collecting data

We collect data with for each survey (separated by country), the number of eels, their weights (divided by haul duration), mean length, min length and max length of eels caught per year and quarterey, plus the location of hauls in which eels were caught.

yearssurvey = lapply(surveys, getSurveyYearList)


summarycatch=lapply(seq_len(length(surveys)), function(isurv){
  print(surveys[isurv])
  years=yearssurvey[[isurv]]
  lapply(years, function(y, isurv) {
    print(y)
    done = FALSE
    try = 0
    while (done==FALSE && try <3){
      tryCatch({
        quart=getSurveyYearQuarterList(surveys[isurv],y)
        catch=getCatchWgt(surveys[isurv], y, quart, aphia)
        done=TRUE
        if (is.null(catch)) return(NULL)
        return(catch)
      }, error=function(e) { try <<- try + 1 })
    }
  }, isurv = isurv)
})
save.image("datras.rdata")

Summary of what is in

In the following table, we see a few summary statistics of what is caught.

library(flextable)
load("datras.rdata")

pos = do.call(bind_rows,lapply(summarycatch, function(surv){
  do.call(bind_rows,lapply(surv, function(quart) quart$pos))
}))

statcatch = do.call(bind_rows,lapply(summarycatch, function(surv){
  do.call(bind_rows,lapply(surv, function(quart) {
    if (! "CatchWgtHour" %in% names(quart$summary))
      quart$summary$CatchWgtHour=0
    if ("CatCatchWgt" %in% names(quart$summary)){
      return(quart$summary %>%
               select(-LngtClass,-CatCatchWgt))
    } else{
      return(quart$summary)
    }}
    ))
}))




mytable <- statcatch %>%
  group_by(Survey,Quarter, Country) %>%
  summarise(averageCatchWgt=round(mean(CatchWgt, na.rm=TRUE)),
            averageCatchWgtHour=round(mean(CatchWgtHour, na.rm=TRUE)),
            averageCatchN=round(mean(number, na.rm=TRUE)),
            freqOc=round(mean(nnonNA/nhaul*100, na.rm = TRUE)),
            meanL=round(weighted.mean(meanL,number,na.rm=TRUE)),
            minL=ifelse(is.infinite(round(min(minL, na.rm=TRUE))), NA,round(min(minL, na.rm=TRUE))), 
            maxL=ifelse(is.infinite(round(max(maxL,na.rm=TRUE))), NA,round(max(maxL,na.rm=TRUE))))  %>%
  arrange(Survey, Quarter)
surveym10 <- mytable %>%
  filter(averageCatchN >= 6) %>%
  select(Survey,Quarter, Country)
library(ggplot2)
flextable(mytable %>% filter (averageCatchN > 0))

We see in the table that in most of the case, eels are caught in less than 1% of hauls and there are very few surveys in which we collect more than 10 eels in average: BITS Quart 3 - BITS Quart 4 - BITS Quart 4 - BITS Quart 4 - BTS Quart 3 - BTS Quart 3 - DYFS Quart 3 - DYFS Quart 3 - DYFS Quart 4 - FR-CGFS Quart 4 - NS-IBTS Quart 2 - NS-IBTS Quart 3 - NS-IBTS Quart 3 - NS-IBTS Quart 3 - NS-IBTS Quart 3 - NS-IBTS Quart 3 - NS-IBTS Quart 4 - NS-IBTS Quart 4 - SNS Quart 3 - SNS Quart 4. Except in SP-North where we have smal eels (13cm in average, but very limited number), it is mostly large yellow or silver eels. Some lengths are strange: e.g. FR-CGFS, so checks are needed here.

statcatch <- statcatch %>%
  ungroup() %>%
  mutate(number = ifelse(CatchWgt==0, 0, number))

nevercaught <- statcatch %>%
  group_by(Survey,Quarter) %>%
  summarise(CatchWgt=mean(CatchWgt, na.rm=TRUE),
            freqOc=mean(nnonNA/nhaul, na.rm = TRUE)) %>%
  arrange(Survey, Quarter) %>%
  filter(freqOc == 0) %>%
  select(Survey, Quarter)

statcatch_nonnull <- statcatch %>% 
  anti_join(nevercaught) %>%
  group_by(Quarter,Year,Survey) %>%
  summarize(nnonNA=sum(nnonNA),
            CatchWgtHour=weighted.mean(CatchWgtHour,nhaul,na.rm=TRUE),
            nhaul=sum(nhaul),
            number=sum(number))
  
  
  
statcatch_nonnull %>%
  ggplot(aes(x = Year, y = nnonNA / nhaul)) + 
  geom_line(aes(col = as.factor(Quarter))) + 
  facet_wrap(~Survey, scales="free") + 
  ylab("frequence non null haul")

statcatch_nonnull %>%
  ggplot(aes(x = Year, y = CatchWgtHour)) + 
  geom_line(aes(col = as.factor(Quarter))) + 
  facet_wrap(~Survey, scales="free") +
  ylab("CPUE")

statcatch_nonnull %>%
  ggplot(aes(x = Year, y = number)) + 
  geom_line(aes(col = as.factor(Quarter))) + 
  facet_wrap(~Survey, scales="free") +
  ylab("number caught")

The temporal trends are very noisy, except for BTS quarter 3 on which an exponential decrease is visible (except few weird zero at the begining of the time trends). Interestingly, BITS quarter 4 is variable but stable till 2007 and then declineng very fast. It seems consistent with NS-IBTS quarter 3 so perhaps something is happening there. FR-CGFS is very noisy and the begining of the time series is weird (till 1995).

Spatial distibution of catches

Here are the maps of catches for survey/quarter that catch at least 10 eels in average

library(leaflet)
library(crosstalk)

subpos = pos %>%
  right_join(surveym10) %>%
  mutate(SurveyQuart=paste(Survey,Quarter)) %>%
  mutate(CatchWgt=CatchWgt/1000)
shared_data <- SharedData$new(subpos %>%
                                  filter(!(HaulLat==-9 & HaulLong==-9)))

filter_select('survquart', label = 'Survey Quarter',
              shared_data, group=~SurveyQuart)
  pal <- colorNumeric("viridis",
                    c(0,
                      max(max(shared_data$data(withSelection=TRUE) %>%
                            filter(selected_) %>%
                            select(CatchWgt) %>%
                            pull(), na.rm=TRUE),1)))
  leaflet(shared_data) %>%
    addTiles() %>%
    addCircleMarkers(lng=~HaulLong,
              lat=~HaulLat,
              color=~pal(CatchWgt))